unsigned int ft_size = 0;
start_info_t *virt_startinfo_address;
unsigned long long time;
- l2_pgentry_t *l2tab;
+ l2_pgentry_t *l2tab, *l2start;
l1_pgentry_t *l1tab = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
* filled in by now !!
*/
phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
- l2tab = map_domain_mem(phys_l2tab);
+ l2start = l2tab = map_domain_mem(phys_l2tab);
memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
if(dom == 0)
ft_size = frame_table_size;
- phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t);
+ l2tab += l2_table_offset(virt_load_address);
for ( cur_address = start_address;
cur_address != (end_address + PAGE_SIZE + ft_size);
cur_address += PAGE_SIZE )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
+ if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
- l2tab = map_domain_mem(phys_l2tab);
- *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT);
- phys_l2tab += sizeof(l2_pgentry_t);
+ *l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
l1tab = map_domain_mem(phys_l1tab);
clear_page(l1tab);
l1tab += l1_table_offset(
page->type_count = page->tot_count = 1;
}
}
+ unmap_domain_mem(l1tab-1);
/* Pages that are part of page tables must be read-only. */
vaddr = virt_load_address + alloc_address - start_address;
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(vaddr) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(vaddr) * sizeof(l1_pgentry_t));
- phys_l2tab += sizeof(l2_pgentry_t);
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(vaddr);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(vaddr);
+ l2tab++;
for ( cur_address = alloc_address;
cur_address != end_address;
cur_address += PAGE_SIZE )
{
- *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab);
- phys_l2tab += sizeof(l2_pgentry_t);
- l1tab = map_domain_mem(phys_l1tab);
+ unmap_domain_mem(l1tab-1);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l2tab++;
}
+ *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_l1_page_table;
page->tot_count++;
}
+ unmap_domain_mem(l1tab-1);
page->flags = dom | PGT_l2_page_table;
/* Map in the the shared info structure. */
virt_shinfo_address = end_address - start_address + virt_load_address;
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t));
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(virt_shinfo_address);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(virt_shinfo_address);
*l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
+ unmap_domain_mem(l1tab);
/* Set up shared info area. */
rdtscll(time);
cur_address < virt_ftable_end_addr;
cur_address += PAGE_SIZE)
{
- phys_l2tab = pagetable_val(p->mm.pagetable) +
- (l2_table_offset(cur_address) * sizeof(l2_pgentry_t));
- l2tab = map_domain_mem(phys_l2tab);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(cur_address) * sizeof(l1_pgentry_t));
- l1tab = map_domain_mem(phys_l1tab);
+ l2tab = l2start + l2_table_offset(cur_address);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(cur_address);
*l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
+ unmap_domain_mem(l1tab);
ft_mapping += PAGE_SIZE;
}
}
(alloc_address - start_address - PAGE_SIZE + virt_load_address);
virt_stack_address = (unsigned long)virt_startinfo_address;
+ unmap_domain_mem(l2start);
+
/* Install the new page tables. */
__cli();
__asm__ __volatile__ (
#define MEM_LOG(_f, _a...) ((void)0)
#endif
+/* Domain 0 is allowed to submit requests on behalf of others. */
+#define DOMAIN_OKAY(_f) \
+ ((((_f) & PG_domain_mask) == current->domain) || (current->domain == 0))
+
/* 'get' checks parameter for validity before inc'ing refcnt. */
static int get_l2_table(unsigned long page_nr);
static int get_l1_table(unsigned long page_nr);
static int get_page(unsigned long page_nr, int writeable);
static int inc_page_refcnt(unsigned long page_nr, unsigned int type);
/* 'put' does no checking because if refcnt not zero, entity must be valid. */
-static int put_l2_table(unsigned long page_nr);
+static void put_l2_table(unsigned long page_nr);
static void put_l1_table(unsigned long page_nr);
static void put_page(unsigned long page_nr, int writeable);
static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
if ( page_nr >= max_page )
{
MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
- return(-1);
+ return -1;
}
page = frame_table + page_nr;
flags = page->flags;
- if ( (flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(flags) )
{
MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
- return(-1);
+ return -1;
}
if ( (flags & PG_type_mask) != type )
{
MEM_LOG("Page %08lx bad type/count (%08lx!=%08x) cnt=%ld",
page_nr << PAGE_SHIFT,
flags & PG_type_mask, type, page_type_count(page));
- return(-1);
+ return -1;
}
page->flags |= type;
}
get_page_tot(page);
- return(get_page_type(page));
+ return get_page_type(page);
}
/* Return new refcnt, or -1 on error. */
if ( page_nr >= max_page )
{
MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
- return(-1);
+ return -1;
}
page = frame_table + page_nr;
- if ( (page->flags & (PG_type_mask | PG_domain_mask)) !=
- (type | current->domain) )
+ if ( !DOMAIN_OKAY(page->flags) ||
+ ((page->flags & PG_type_mask) != type) )
{
MEM_LOG("Bad page type/domain (dom=%ld) (type %ld != expected %d)",
page->flags & PG_domain_mask, page->flags & PG_type_mask,
type);
- return(-1);
+ return -1;
}
ASSERT(page_type_count(page) != 0);
if ( (ret = put_page_type(page)) == 0 ) page->flags &= ~PG_type_mask;
put_page_tot(page);
- return(ret);
+ return ret;
+}
+
+
+/* We allow a L2 table to map itself, to achieve a linear pagetable. */
+/* NB. There's no need for a put_twisted_l2_table() function!! */
+static int get_twisted_l2_table(unsigned long entry_pfn, l2_pgentry_t l2e)
+{
+ unsigned long l2v = l2_pgentry_val(l2e);
+
+ /* Clearly the mapping must be read-only :-) */
+ if ( (l2v & _PAGE_RW) )
+ {
+ MEM_LOG("Attempt to install twisted L2 entry with write permissions");
+ return -1;
+ }
+
+ /* This is a sufficient final check. */
+ if ( (l2v >> PAGE_SHIFT) != entry_pfn )
+ {
+ MEM_LOG("L2 tables may not map _other_ L2 tables!\n");
+ return -1;
+ }
+
+ /* We don't bump the reference counts. */
+ return 0;
}
int i, ret=0;
ret = inc_page_refcnt(page_nr, PGT_l2_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( ret != 0 ) return (ret < 0) ? ret : 0;
/* NEW level-2 page table! Deal with every PDE in the table. */
p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
{
MEM_LOG("Bad L2 page type settings %04lx",
l2_pgentry_val(l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE));
- return(-1);
+ ret = -1;
+ goto out;
}
+ /* Assume we're mapping an L1 table, falling back to twisted L2. */
ret = get_l1_table(l2_pgentry_to_pagenr(l2_entry));
- if ( ret ) return(ret);
- p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
- ((i+1) * sizeof(l2_pgentry_t)));
+ if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry);
+ if ( ret ) goto out;
}
/* Now we simply slap in our high mapping. */
DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
mk_l2_pgentry(__pa(current->mm.perdomain_pt) | __PAGE_HYPERVISOR);
- return(ret);
+ out:
+ unmap_domain_mem(p_l2_entry);
+ return ret;
}
static int get_l1_table(unsigned long page_nr)
/* Update ref count for page pointed at by PDE. */
ret = inc_page_refcnt(page_nr, PGT_l1_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( ret != 0 ) return (ret < 0) ? ret : 0;
/* NEW level-1 page table! Deal with every PTE in the table. */
p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
MEM_LOG("Bad L1 page type settings %04lx",
l1_pgentry_val(l1_entry) &
(_PAGE_GLOBAL|_PAGE_PAT));
- return(-1);
+ ret = -1;
+ goto out;
}
ret = get_page(l1_pgentry_to_pagenr(l1_entry),
l1_pgentry_val(l1_entry) & _PAGE_RW);
- if ( ret ) return(ret);
+ if ( ret ) goto out;
}
- return(ret);
+ out:
+ /* Make sure we unmap the right page! */
+ unmap_domain_mem(p_l1_entry-1);
+ return ret;
}
static int get_page(unsigned long page_nr, int writeable)
}
page = frame_table + page_nr;
flags = page->flags;
- if ( (flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(flags) )
{
MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
return(-1);
return(0);
}
-static int put_l2_table(unsigned long page_nr)
+static void put_l2_table(unsigned long page_nr)
{
l2_pgentry_t *p_l2_entry, l2_entry;
- int i, ret;
+ int i;
- ret = dec_page_refcnt(page_nr, PGT_l2_page_table);
- if ( ret != 0 ) return((ret < 0) ? ret : 0);
+ if ( dec_page_refcnt(page_nr, PGT_l2_page_table) ) return;
/* We had last reference to level-2 page table. Free the PDEs. */
p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
- for ( i = 0; i < HYPERVISOR_ENTRIES_PER_L2_PAGETABLE; i++ )
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
{
l2_entry = *p_l2_entry++;
if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) )
- {
put_l1_table(l2_pgentry_to_pagenr(l2_entry));
- p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
- ((i+1) * sizeof(l2_pgentry_t)));
- }
}
- return(0);
+ unmap_domain_mem(p_l2_entry);
}
static void put_l1_table(unsigned long page_nr)
l1_pgentry_t *p_l1_entry, l1_entry;
int i;
- if ( dec_page_refcnt(page_nr, PGT_l1_page_table) != 0 ) return;
+ if ( dec_page_refcnt(page_nr, PGT_l1_page_table) ) return;
/* We had last reference to level-1 page table. Free the PTEs. */
p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
l1_pgentry_val(l1_entry) & _PAGE_RW);
}
}
+
+ /* Make sure we unmap the right page! */
+ unmap_domain_mem(p_l1_entry-1);
}
static void put_page(unsigned long page_nr, int writeable)
struct pfn_info *page;
ASSERT(page_nr < max_page);
page = frame_table + page_nr;
- ASSERT((page->flags & PG_domain_mask) == current->domain);
+ ASSERT(DOMAIN_OKAY(page->flags));
ASSERT((!writeable) ||
((page_type_count(page) != 0) &&
((page->flags & PG_type_mask) == PGT_writeable_page)));
goto fail;
}
- /*
- * Write the new value while pointer is still valid. The mapping cache
- * entry for p_l2_entry may get clobbered by {put,get}_l1_table.
- */
- *p_l2_entry = new_l2_entry;
-
if ( (l2_pgentry_val(new_l2_entry) & _PAGE_PRESENT) )
{
if ( (l2_pgentry_val(new_l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)) )
put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
}
- if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) )
+ /* Assume we're mapping an L1 table, falling back to twisted L2. */
+ if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) &&
+ get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) )
goto fail;
}
}
put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
}
- return(0);
+ *p_l2_entry = new_l2_entry;
+ unmap_domain_mem(p_l2_entry);
+ return 0;
fail:
- /*
- * On failure we put the old value back. We need to regrab the
- * mapping of the physical page frame.
- */
- p_l2_entry = map_domain_mem(pa);
- *p_l2_entry = old_l2_entry;
- return(-1);
+ unmap_domain_mem(p_l2_entry);
+ return -1;
}
l1_pgentry_val(old_l1_entry) & _PAGE_RW);
}
- /* p_l1_entry is still valid here */
*p_l1_entry = new_l1_entry;
+ unmap_domain_mem(p_l1_entry);
+ return 0;
- return(0);
fail:
- return(-1);
+ unmap_domain_mem(p_l1_entry);
+ return -1;
}
break;
case PGEXT_UNPIN_TABLE:
- if ( (page->flags & PG_domain_mask) != current->domain )
+ if ( !DOMAIN_OKAY(page->flags) )
{
err = 1;
MEM_LOG("Page %08lx bad domain (dom=%ld)",
case PGREQ_NORMAL:
page = frame_table + pfn;
flags = page->flags;
- if ( (flags & PG_domain_mask) == current->domain )
+ if ( DOMAIN_OKAY(flags) )
{
switch ( (flags & PG_type_mask) )
{
flags = page->flags;
if ( (flags | current->domain) == PGT_l1_page_table )
{
-
- *(unsigned long *)map_domain_mem(cur.ptr) = cur.val;
+ unsigned long *va = map_domain_mem(cur.ptr);
+ *va = cur.val;
+ unmap_domain_mem(va);
err = 0;
}
else